var runtime.work
233 uses
runtime (current package)
metrics.go#L674: a.cpuStats = work.cpuStats
mgc.go#L189: work.startSema = 1
mgc.go#L190: work.markDoneSema = 1
mgc.go#L191: lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters)
mgc.go#L192: lockInit(&work.assistQueue.lock, lockRankAssistQueue)
mgc.go#L193: lockInit(&work.strongFromWeak.lock, lockRankStrongFromWeakQueue)
mgc.go#L194: lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
mgc.go#L316: var work workType
mgc.go#L494: n := work.cycles.Load()
mgc.go#L509: for work.cycles.Load() == n+1 && sweepone() != ^uintptr(0) {
mgc.go#L524: for work.cycles.Load() == n+1 && !isSweepDone() {
mgc.go#L532: cycle := work.cycles.Load()
mgc.go#L544: lock(&work.sweepWaiters.lock)
mgc.go#L545: nMarks := work.cycles.Load()
mgc.go#L552: unlock(&work.sweepWaiters.lock)
mgc.go#L558: work.sweepWaiters.list.push(getg())
mgc.go#L559: goparkunlock(&work.sweepWaiters.lock, waitReasonWaitForGCCycle, traceBlockUntilGCEnds, 1)
mgc.go#L618: return int32(t.n-work.cycles.Load()) > 0
mgc.go#L657: semacquire(&work.startSema)
mgc.go#L660: semrelease(&work.startSema)
mgc.go#L681: work.userForced = trigger.kind == gcTriggerCycle
mgc.go#L701: work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
mgc.go#L702: if work.stwprocs > ncpu {
mgc.go#L705: work.stwprocs = ncpu
mgc.go#L707: work.heap0 = gcController.heapLive.Load()
mgc.go#L708: work.pauseNS = 0
mgc.go#L709: work.mode = mode
mgc.go#L712: work.tSweepTerm = now
mgc.go#L719: work.cpuStats.accumulateGCPauseTime(stw.stoppingCPUTime, 1)
mgc.go#L730: work.cycles.Add(1)
mgc.go#L788: work.cpuStats.accumulateGCPauseTime(nanotime()-stw.finishedStopping, work.maxprocs)
mgc.go#L793: work.pauseNS += now - stw.startedStopping
mgc.go#L794: work.tMark = now
mgc.go#L813: semrelease(&work.startSema)
mgc.go#L861: semacquire(&work.markDoneSema)
mgc.go#L870: if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) {
mgc.go#L871: semrelease(&work.markDoneSema)
mgc.go#L881: work.strongFromWeak.block = true
mgc.go#L922: work.tMarkTerm = now
mgc.go#L933: work.cpuStats.accumulateGCPauseTime(stw.stoppingCPUTime, 1)
mgc.go#L960: work.cpuStats.accumulateGCPauseTime(nanotime()-stw.finishedStopping, work.maxprocs)
mgc.go#L964: work.pauseNS += now - stw.startedStopping
mgc.go#L985: work.strongFromWeak.block = false
mgc.go#L991: semrelease(&work.markDoneSema)
mgc.go#L1000: gcController.endCycle(now, int(gomaxprocs), work.userForced)
mgc.go#L1012: work.heap1 = gcController.heapLive.Load()
mgc.go#L1042: work.heap2 = work.bytesMarked
mgc.go#L1059: stwSwept = gcSweep(work.mode)
mgc.go#L1089: work.pauseNS += now - stw.startedStopping
mgc.go#L1090: work.tEnd = now
mgc.go#L1093: memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
mgc.go#L1095: memstats.pause_total_ns += uint64(work.pauseNS)
mgc.go#L1105: work.cpuStats.accumulateGCPauseTime(now-stw.finishedStopping, work.maxprocs)
mgc.go#L1106: work.cpuStats.accumulate(now, true)
mgc.go#L1110: memstats.gc_cpu_fraction = float64(work.cpuStats.GCTotalTime-work.cpuStats.GCIdleTime) / float64(work.cpuStats.TotalTime)
mgc.go#L1122: if work.userForced {
mgc.go#L1127: lock(&work.sweepWaiters.lock)
mgc.go#L1129: injectglist(&work.sweepWaiters.list)
mgc.go#L1130: unlock(&work.sweepWaiters.lock)
mgc.go#L1232: " @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
mgc.go#L1234: prev := work.tSweepTerm
mgc.go#L1235: for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
mgc.go#L1244: int64(work.stwprocs) * (work.tMark - work.tSweepTerm),
mgc.go#L1248: int64(work.stwprocs) * (work.tEnd - work.tMarkTerm),
mgc.go#L1259: work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
mgc.go#L1263: work.maxprocs, " P")
mgc.go#L1264: if work.userForced {
mgc.go#L1358: work.nproc = ^uint32(0)
mgc.go#L1359: work.nwait = ^uint32(0)
mgc.go#L1466: decnwait := atomic.Xadd(&work.nwait, -1)
mgc.go#L1467: if decnwait == work.nproc {
mgc.go#L1468: println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
mgc.go#L1526: incnwait := atomic.Xadd(&work.nwait, +1)
mgc.go#L1527: if incnwait > work.nproc {
mgc.go#L1529: "work.nwait=", incnwait, "work.nproc=", work.nproc)
mgc.go#L1541: if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
mgc.go#L1560: if !work.full.empty() {
mgc.go#L1563: if work.markrootNext < work.markrootJobs {
mgc.go#L1576: work.tstart = startTime
mgc.go#L1579: if work.full != 0 || work.markrootNext < work.markrootJobs {
mgc.go#L1580: print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
mgc.go#L1593: work.stackRoots = nil
mgc.go#L1654: gcController.resetLive(work.bytesMarked)
mgc.go#L1746: work.bytesMarked = 0
mgc.go#L1747: work.initialHeapLive = gcController.heapLive.Load()
mgcmark.go#L66: work.nDataRoots = 0
mgcmark.go#L67: work.nBSSRoots = 0
mgcmark.go#L72: if nDataRoots > work.nDataRoots {
mgcmark.go#L73: work.nDataRoots = nDataRoots
mgcmark.go#L77: if nBSSRoots > work.nBSSRoots {
mgcmark.go#L78: work.nBSSRoots = nBSSRoots
mgcmark.go#L95: work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
mgcmark.go#L103: work.stackRoots = allGsSnapshot()
mgcmark.go#L104: work.nStackRoots = len(work.stackRoots)
mgcmark.go#L106: work.markrootNext = 0
mgcmark.go#L107: work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
mgcmark.go#L110: work.baseData = uint32(fixedRootCount)
mgcmark.go#L111: work.baseBSS = work.baseData + uint32(work.nDataRoots)
mgcmark.go#L112: work.baseSpans = work.baseBSS + uint32(work.nBSSRoots)
mgcmark.go#L113: work.baseStacks = work.baseSpans + uint32(work.nSpanRoots)
mgcmark.go#L114: work.baseEnd = work.baseStacks + uint32(work.nStackRoots)
mgcmark.go#L120: if work.markrootNext < work.markrootJobs {
mgcmark.go#L121: print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
mgcmark.go#L132: if i >= work.nStackRoots {
mgcmark.go#L166: case work.baseData <= i && i < work.baseBSS:
mgcmark.go#L169: workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData))
mgcmark.go#L172: case work.baseBSS <= i && i < work.baseSpans:
mgcmark.go#L175: workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS))
mgcmark.go#L189: case work.baseSpans <= i && i < work.baseStacks:
mgcmark.go#L191: markrootSpans(gcw, int(i-work.baseSpans))
mgcmark.go#L196: if i < work.baseStacks || work.baseEnd <= i {
mgcmark.go#L198: print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n")
mgcmark.go#L201: gp := work.stackRoots[i-work.baseStacks]
mgcmark.go#L207: gp.waitsince = work.tstart
mgcmark.go#L641: decnwait := atomic.Xadd(&work.nwait, -1)
mgcmark.go#L642: if decnwait == work.nproc {
mgcmark.go#L643: println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
mgcmark.go#L668: incnwait := atomic.Xadd(&work.nwait, +1)
mgcmark.go#L669: if incnwait > work.nproc {
mgcmark.go#L671: "work.nproc=", work.nproc)
mgcmark.go#L675: if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
mgcmark.go#L700: lock(&work.assistQueue.lock)
mgcmark.go#L701: list := work.assistQueue.q.popList()
mgcmark.go#L703: unlock(&work.assistQueue.lock)
mgcmark.go#L711: lock(&work.assistQueue.lock)
mgcmark.go#L716: unlock(&work.assistQueue.lock)
mgcmark.go#L721: oldList := work.assistQueue.q
mgcmark.go#L722: work.assistQueue.q.pushBack(gp)
mgcmark.go#L729: work.assistQueue.q = oldList
mgcmark.go#L733: unlock(&work.assistQueue.lock)
mgcmark.go#L737: goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceBlockGCMarkAssist, 2)
mgcmark.go#L752: if work.assistQueue.q.empty() {
mgcmark.go#L764: lock(&work.assistQueue.lock)
mgcmark.go#L765: for !work.assistQueue.q.empty() && scanBytes > 0 {
mgcmark.go#L766: gp := work.assistQueue.q.pop()
mgcmark.go#L788: work.assistQueue.q.pushBack(gp)
mgcmark.go#L799: unlock(&work.assistQueue.lock)
mgcmark.go#L1180: if work.markrootNext < work.markrootJobs {
mgcmark.go#L1184: job := atomic.Xadd(&work.markrootNext, +1) - 1
mgcmark.go#L1185: if job >= work.markrootJobs {
mgcmark.go#L1211: if work.full == 0 {
mgcmark.go#L1291: if work.full == 0 {
mgcmark.go#L1308: if work.markrootNext < work.markrootJobs {
mgcmark.go#L1309: job := atomic.Xadd(&work.markrootNext, +1) - 1
mgcmark.go#L1310: if job < work.markrootJobs {
mgcpacer.go#L461: work.initialHeapLive>>20, "->",
mgcpacer.go#L1271: gcWaitOnMark(work.cycles.Load())
mgcwork.go#L118: lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
mgcwork.go#L275: atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
mgcwork.go#L354: if work.empty != 0 {
mgcwork.go#L355: b = (*workbuf)(work.empty.pop())
mgcwork.go#L362: lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
mgcwork.go#L367: if work.wbufSpans.free.first != nil {
mgcwork.go#L368: lock(&work.wbufSpans.lock)
mgcwork.go#L369: s = work.wbufSpans.free.first
mgcwork.go#L371: work.wbufSpans.free.remove(s)
mgcwork.go#L372: work.wbufSpans.busy.insert(s)
mgcwork.go#L374: unlock(&work.wbufSpans.lock)
mgcwork.go#L384: lock(&work.wbufSpans.lock)
mgcwork.go#L385: work.wbufSpans.busy.insert(s)
mgcwork.go#L386: unlock(&work.wbufSpans.lock)
mgcwork.go#L410: work.empty.push(&b.node)
mgcwork.go#L420: work.full.push(&b.node)
mgcwork.go#L428: b := (*workbuf)(work.full.pop())
mgcwork.go#L454: lock(&work.wbufSpans.lock)
mgcwork.go#L455: if work.full != 0 {
mgcwork.go#L461: work.empty = 0
mgcwork.go#L462: work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
mgcwork.go#L463: unlock(&work.wbufSpans.lock)
mgcwork.go#L470: lock(&work.wbufSpans.lock)
mgcwork.go#L471: if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
mgcwork.go#L472: unlock(&work.wbufSpans.lock)
mgcwork.go#L478: span := work.wbufSpans.free.first
mgcwork.go#L482: work.wbufSpans.free.remove(span)
mgcwork.go#L486: more := !work.wbufSpans.free.isEmpty()
mgcwork.go#L487: unlock(&work.wbufSpans.lock)
mheap.go#L2057: if work.strongFromWeak.block {
mheap.go#L2111: for work.strongFromWeak.block {
mheap.go#L2112: lock(&work.strongFromWeak.lock)
mheap.go#L2116: work.strongFromWeak.q.pushBack(getg())
mheap.go#L2119: goparkunlock(&work.strongFromWeak.lock, waitReasonGCWeakToStrongWait, traceBlockGCWeakToStrongWait, 2)
mheap.go#L2135: lock(&work.strongFromWeak.lock)
mheap.go#L2136: list := work.strongFromWeak.q.popList()
mheap.go#L2138: unlock(&work.strongFromWeak.lock)
 |
The pages are generated with Golds v0.7.6. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds. |